HAMMER 54D/Many: Performance tuning.
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
blob4ea25b2dd4c359b0637de4599d631c9e4115483b
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.67 2008/06/13 00:25:33 dillon Exp $
37 #include "hammer.h"
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_rec_trunc_callback(hammer_record_t record,
43 void *data __unused);
45 struct rec_trunc_info {
46 u_int16_t rec_type;
47 int64_t trunc_off;
51 * Red-black tree support. Comparison code for insertion.
53 static int
54 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
56 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
57 return(-1);
58 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
59 return(1);
61 if (rec1->leaf.base.key < rec2->leaf.base.key)
62 return(-1);
63 if (rec1->leaf.base.key > rec2->leaf.base.key)
64 return(1);
66 #if 0
68 * XXX create_tid is set during sync, memory records are always
69 * current. Do not match against create_tid.
71 if (rec1->leaf.base.create_tid == 0) {
72 if (rec2->leaf.base.create_tid == 0)
73 return(0);
74 return(1);
76 if (rec2->leaf.base.create_tid == 0)
77 return(-1);
79 if (rec1->leaf.base.create_tid < rec2->leaf.base.create_tid)
80 return(-1);
81 if (rec1->leaf.base.create_tid > rec2->leaf.base.create_tid)
82 return(1);
83 #endif
86 * Never match against an item deleted by the front-end.
88 if (rec1->flags & HAMMER_RECF_DELETED_FE)
89 return(1);
90 if (rec2->flags & HAMMER_RECF_DELETED_FE)
91 return(-1);
93 return(0);
97 * Basic record comparison code similar to hammer_btree_cmp().
99 static int
100 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
102 if (elm->rec_type < rec->leaf.base.rec_type)
103 return(-3);
104 if (elm->rec_type > rec->leaf.base.rec_type)
105 return(3);
107 if (elm->key < rec->leaf.base.key)
108 return(-2);
109 if (elm->key > rec->leaf.base.key)
110 return(2);
112 #if 0
114 * XXX create_tid is set during sync, memory records are always
115 * current. Do not match against create_tid.
117 if (elm->create_tid == 0) {
118 if (rec->leaf.base.create_tid == 0)
119 return(0);
120 return(1);
122 if (rec->leaf.base.create_tid == 0)
123 return(-1);
124 if (elm->create_tid < rec->leaf.base.create_tid)
125 return(-1);
126 if (elm->create_tid > rec->leaf.base.create_tid)
127 return(1);
128 #endif
130 * Never match against an item deleted by the front-end.
132 if (rec->flags & HAMMER_RECF_DELETED_FE)
133 return(1);
134 return(0);
138 * Special LOOKUP_INFO to locate an overlapping record. This used by
139 * the reservation code to implement small-block records (whos keys will
140 * be different depending on data_len, when representing the same base
141 * offset).
143 * NOTE: The base file offset of a data record is (key - data_len), not (key).
145 static int
146 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
148 if (leaf->base.rec_type < rec->leaf.base.rec_type)
149 return(-3);
150 if (leaf->base.rec_type > rec->leaf.base.rec_type)
151 return(3);
153 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
154 /* leaf_end <= rec_beg */
155 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
156 return(-2);
157 /* leaf_beg >= rec_end */
158 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
159 return(2);
160 } else {
161 if (leaf->base.key < rec->leaf.base.key)
162 return(-2);
163 if (leaf->base.key > rec->leaf.base.key)
164 return(2);
167 #if 0
168 if (leaf->base.create_tid == 0) {
169 if (rec->leaf.base.create_tid == 0)
170 return(0);
171 return(1);
173 if (rec->leaf.base.create_tid == 0)
174 return(-1);
175 if (leaf->base.create_tid < rec->leaf.base.create_tid)
176 return(-1);
177 if (leaf->base.create_tid > rec->leaf.base.create_tid)
178 return(1);
179 #endif
181 * Never match against an item deleted by the front-end.
183 if (rec->flags & HAMMER_RECF_DELETED_FE)
184 return(1);
185 return(0);
189 * RB_SCAN comparison code for hammer_mem_first(). The argument order
190 * is reversed so the comparison result has to be negated. key_beg and
191 * key_end are both range-inclusive.
193 * Localized deletions are not cached in-memory.
195 static
197 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
199 hammer_cursor_t cursor = data;
200 int r;
202 r = hammer_rec_cmp(&cursor->key_beg, rec);
203 if (r > 1)
204 return(-1);
205 r = hammer_rec_cmp(&cursor->key_end, rec);
206 if (r < -1)
207 return(1);
208 return(0);
212 * This compare function is used when simply looking up key_beg.
214 static
216 hammer_rec_find_cmp(hammer_record_t rec, void *data)
218 hammer_cursor_t cursor = data;
219 int r;
221 r = hammer_rec_cmp(&cursor->key_beg, rec);
222 if (r > 1)
223 return(-1);
224 if (r < -1)
225 return(1);
226 return(0);
230 * Locate blocks within the truncation range. Partial blocks do not count.
232 static
234 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
236 struct rec_trunc_info *info = data;
238 if (rec->leaf.base.rec_type < info->rec_type)
239 return(-1);
240 if (rec->leaf.base.rec_type > info->rec_type)
241 return(1);
243 switch(rec->leaf.base.rec_type) {
244 case HAMMER_RECTYPE_DB:
246 * DB record key is not beyond the truncation point, retain.
248 if (rec->leaf.base.key < info->trunc_off)
249 return(-1);
250 break;
251 case HAMMER_RECTYPE_DATA:
253 * DATA record offset start is not beyond the truncation point,
254 * retain.
256 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
257 return(-1);
258 break;
259 default:
260 panic("hammer_rec_trunc_cmp: unexpected record type");
264 * The record start is >= the truncation point, return match,
265 * the record should be destroyed.
267 return(0);
270 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
271 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
272 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
275 * Allocate a record for the caller to finish filling in. The record is
276 * returned referenced.
278 hammer_record_t
279 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
281 hammer_record_t record;
283 ++hammer_count_records;
284 record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK | M_ZERO);
285 record->flush_state = HAMMER_FST_IDLE;
286 record->ip = ip;
287 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
288 record->leaf.data_len = data_len;
289 hammer_ref(&record->lock);
291 if (data_len) {
292 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
293 record->flags |= HAMMER_RECF_ALLOCDATA;
294 ++hammer_count_record_datas;
297 return (record);
300 void
301 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
303 while (record->flush_state == HAMMER_FST_FLUSH) {
304 record->flags |= HAMMER_RECF_WANTED;
305 tsleep(record, 0, ident, 0);
310 * Called from the backend, hammer_inode.c, after a record has been
311 * flushed to disk. The record has been exclusively locked by the
312 * caller and interlocked with BE.
314 * We clean up the state, unlock, and release the record (the record
315 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
317 void
318 hammer_flush_record_done(hammer_record_t record, int error)
320 hammer_inode_t target_ip;
322 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
323 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
325 if (error) {
327 * An error occured, the backend was unable to sync the
328 * record to its media. Leave the record intact.
330 Debugger("flush_record_done error");
333 if (record->flags & HAMMER_RECF_DELETED_BE) {
334 if ((target_ip = record->target_ip) != NULL) {
335 TAILQ_REMOVE(&target_ip->target_list, record,
336 target_entry);
337 record->target_ip = NULL;
338 hammer_test_inode(target_ip);
340 record->flush_state = HAMMER_FST_IDLE;
341 } else {
342 if (record->target_ip) {
343 record->flush_state = HAMMER_FST_SETUP;
344 hammer_test_inode(record->ip);
345 hammer_test_inode(record->target_ip);
346 } else {
347 record->flush_state = HAMMER_FST_IDLE;
350 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
351 if (record->flags & HAMMER_RECF_WANTED) {
352 record->flags &= ~HAMMER_RECF_WANTED;
353 wakeup(record);
355 hammer_rel_mem_record(record);
359 * Release a memory record. Records marked for deletion are immediately
360 * removed from the RB-Tree but otherwise left intact until the last ref
361 * goes away.
363 void
364 hammer_rel_mem_record(struct hammer_record *record)
366 hammer_inode_t ip, target_ip;
368 hammer_unref(&record->lock);
370 if (record->lock.refs == 0) {
372 * Upon release of the last reference wakeup any waiters.
373 * The record structure may get destroyed so callers will
374 * loop up and do a relookup.
376 ip = record->ip;
379 * Upon release of the last reference a record marked deleted
380 * is destroyed.
382 if (record->flags & HAMMER_RECF_DELETED_FE) {
383 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
385 if ((target_ip = record->target_ip) != NULL) {
386 TAILQ_REMOVE(&target_ip->target_list,
387 record, target_entry);
388 record->target_ip = NULL;
389 hammer_test_inode(target_ip);
392 if (record->flags & HAMMER_RECF_ONRBTREE) {
393 RB_REMOVE(hammer_rec_rb_tree,
394 &record->ip->rec_tree,
395 record);
396 KKASSERT(ip->rsv_recs > 0);
397 --ip->hmp->rsv_recs;
398 --ip->rsv_recs;
399 ip->hmp->rsv_databytes -= record->leaf.data_len;
400 record->flags &= ~HAMMER_RECF_ONRBTREE;
402 if ((ip->flags & HAMMER_INODE_PARTIALW) &&
403 ip->rsv_recs <= hammer_limit_irecs) {
404 ip->flags &= ~HAMMER_INODE_PARTIALW;
405 wakeup(&ip->flags);
407 if (RB_EMPTY(&record->ip->rec_tree)) {
408 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
409 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
410 hammer_test_inode(record->ip);
413 if (record->flags & HAMMER_RECF_ALLOCDATA) {
414 --hammer_count_record_datas;
415 kfree(record->data, M_HAMMER);
416 record->flags &= ~HAMMER_RECF_ALLOCDATA;
418 if (record->resv) {
419 hammer_blockmap_reserve_complete(ip->hmp,
420 record->resv);
421 record->resv = NULL;
423 record->data = NULL;
424 --hammer_count_records;
425 kfree(record, M_HAMMER);
431 * Record visibility depends on whether the record is being accessed by
432 * the backend or the frontend.
434 * Return non-zero if the record is visible, zero if it isn't or if it is
435 * deleted.
437 static __inline
439 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
441 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
442 if (record->flags & HAMMER_RECF_DELETED_BE)
443 return(0);
444 } else {
445 if (record->flags & HAMMER_RECF_DELETED_FE)
446 return(0);
448 return(1);
452 * This callback is used as part of the RB_SCAN function for in-memory
453 * records. We terminate it (return -1) as soon as we get a match.
455 * This routine is used by frontend code.
457 * The primary compare code does not account for ASOF lookups. This
458 * code handles that case as well as a few others.
460 static
462 hammer_rec_scan_callback(hammer_record_t rec, void *data)
464 hammer_cursor_t cursor = data;
467 * We terminate on success, so this should be NULL on entry.
469 KKASSERT(cursor->iprec == NULL);
472 * Skip if the record was marked deleted.
474 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
475 return(0);
478 * Skip if not visible due to our as-of TID
480 if (cursor->flags & HAMMER_CURSOR_ASOF) {
481 if (cursor->asof < rec->leaf.base.create_tid)
482 return(0);
483 if (rec->leaf.base.delete_tid &&
484 cursor->asof >= rec->leaf.base.delete_tid) {
485 return(0);
490 * If the record is queued to the flusher we have to block until
491 * it isn't. Otherwise we may see duplication between our memory
492 * cache and the media.
494 hammer_ref(&rec->lock);
496 #warning "This deadlocks"
497 #if 0
498 if (rec->flush_state == HAMMER_FST_FLUSH)
499 hammer_wait_mem_record(rec);
500 #endif
503 * The record may have been deleted while we were blocked.
505 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
506 hammer_rel_mem_record(rec);
507 return(0);
511 * Set the matching record and stop the scan.
513 cursor->iprec = rec;
514 return(-1);
519 * Lookup an in-memory record given the key specified in the cursor. Works
520 * just like hammer_btree_lookup() but operates on an inode's in-memory
521 * record list.
523 * The lookup must fail if the record is marked for deferred deletion.
525 static
527 hammer_mem_lookup(hammer_cursor_t cursor)
529 int error;
531 KKASSERT(cursor->ip);
532 if (cursor->iprec) {
533 hammer_rel_mem_record(cursor->iprec);
534 cursor->iprec = NULL;
536 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
537 hammer_rec_scan_callback, cursor);
539 if (cursor->iprec == NULL)
540 error = ENOENT;
541 else
542 error = 0;
543 return(error);
547 * hammer_mem_first() - locate the first in-memory record matching the
548 * cursor within the bounds of the key range.
550 static
552 hammer_mem_first(hammer_cursor_t cursor)
554 hammer_inode_t ip;
556 ip = cursor->ip;
557 KKASSERT(ip != NULL);
559 if (cursor->iprec) {
560 hammer_rel_mem_record(cursor->iprec);
561 cursor->iprec = NULL;
564 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
565 hammer_rec_scan_callback, cursor);
568 * Adjust scan.node and keep it linked into the RB-tree so we can
569 * hold the cursor through third party modifications of the RB-tree.
571 if (cursor->iprec)
572 return(0);
573 return(ENOENT);
576 void
577 hammer_mem_done(hammer_cursor_t cursor)
579 if (cursor->iprec) {
580 hammer_rel_mem_record(cursor->iprec);
581 cursor->iprec = NULL;
585 /************************************************************************
586 * HAMMER IN-MEMORY RECORD FUNCTIONS *
587 ************************************************************************
589 * These functions manipulate in-memory records. Such records typically
590 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
594 * Add a directory entry (dip,ncp) which references inode (ip).
596 * Note that the low 32 bits of the namekey are set temporarily to create
597 * a unique in-memory record, and may be modified a second time when the
598 * record is synchronized to disk. In particular, the low 32 bits cannot be
599 * all 0's when synching to disk, which is not handled here.
602 hammer_ip_add_directory(struct hammer_transaction *trans,
603 struct hammer_inode *dip, struct namecache *ncp,
604 struct hammer_inode *ip)
606 hammer_record_t record;
607 int error;
608 int bytes;
610 bytes = ncp->nc_nlen; /* NOTE: terminating \0 is NOT included */
611 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
612 if (++trans->hmp->namekey_iterator == 0)
613 ++trans->hmp->namekey_iterator;
615 record->type = HAMMER_MEM_RECORD_ADD;
616 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
617 record->leaf.base.obj_id = dip->obj_id;
618 record->leaf.base.key = hammer_directory_namekey(ncp->nc_name, bytes);
619 record->leaf.base.key += trans->hmp->namekey_iterator;
620 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
621 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
622 record->data->entry.obj_id = ip->obj_id;
623 bcopy(ncp->nc_name, record->data->entry.name, bytes);
625 ++ip->ino_data.nlinks;
626 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
629 * The target inode and the directory entry are bound together.
631 record->target_ip = ip;
632 record->flush_state = HAMMER_FST_SETUP;
633 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
636 * The inode now has a dependancy and must be taken out of the idle
637 * state. An inode not in an idle state is given an extra reference.
639 if (ip->flush_state == HAMMER_FST_IDLE) {
640 hammer_ref(&ip->lock);
641 ip->flush_state = HAMMER_FST_SETUP;
643 error = hammer_mem_add(record);
644 return(error);
648 * Delete the directory entry and update the inode link count. The
649 * cursor must be seeked to the directory entry record being deleted.
651 * The related inode should be share-locked by the caller. The caller is
652 * on the frontend.
654 * This function can return EDEADLK requiring the caller to terminate
655 * the cursor, any locks, wait on the returned record, and retry.
658 hammer_ip_del_directory(struct hammer_transaction *trans,
659 hammer_cursor_t cursor, struct hammer_inode *dip,
660 struct hammer_inode *ip)
662 hammer_record_t record;
663 int error;
665 if (hammer_cursor_inmem(cursor)) {
667 * In-memory (unsynchronized) records can simply be freed.
668 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
669 * by the backend, we must still avoid races against the
670 * backend potentially syncing the record to the media.
672 * We cannot call hammer_ip_delete_record(), that routine may
673 * only be called from the backend.
675 record = cursor->iprec;
676 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
677 KKASSERT(cursor->deadlk_rec == NULL);
678 hammer_ref(&record->lock);
679 cursor->deadlk_rec = record;
680 error = EDEADLK;
681 } else {
682 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
683 record->flags |= HAMMER_RECF_DELETED_FE;
684 error = 0;
686 } else {
688 * If the record is on-disk we have to queue the deletion by
689 * the record's key. This also causes lookups to skip the
690 * record.
692 KKASSERT(dip->flags &
693 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
694 record = hammer_alloc_mem_record(dip, 0);
695 record->type = HAMMER_MEM_RECORD_DEL;
696 record->leaf.base = cursor->leaf->base;
698 record->target_ip = ip;
699 record->flush_state = HAMMER_FST_SETUP;
700 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
703 * The inode now has a dependancy and must be taken out of
704 * the idle state. An inode not in an idle state is given
705 * an extra reference.
707 if (ip->flush_state == HAMMER_FST_IDLE) {
708 hammer_ref(&ip->lock);
709 ip->flush_state = HAMMER_FST_SETUP;
712 error = hammer_mem_add(record);
716 * One less link. The file may still be open in the OS even after
717 * all links have gone away.
719 * We have to terminate the cursor before syncing the inode to
720 * avoid deadlocking against ourselves. XXX this may no longer
721 * be true.
723 * If nlinks drops to zero and the vnode is inactive (or there is
724 * no vnode), call hammer_inode_unloadable_check() to zonk the
725 * inode. If we don't do this here the inode will not be destroyed
726 * on-media until we unmount.
728 if (error == 0) {
729 --ip->ino_data.nlinks;
730 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
731 if (ip->ino_data.nlinks == 0 &&
732 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
733 hammer_done_cursor(cursor);
734 hammer_inode_unloadable_check(ip, 1);
735 hammer_flush_inode(ip, 0);
739 return(error);
743 * Add a record to an inode.
745 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
746 * initialize the following additional fields:
748 * The related inode should be share-locked by the caller. The caller is
749 * on the frontend.
751 * record->rec.entry.base.base.key
752 * record->rec.entry.base.base.rec_type
753 * record->rec.entry.base.base.data_len
754 * record->data (a copy will be kmalloc'd if it cannot be embedded)
757 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
759 hammer_inode_t ip = record->ip;
760 int error;
762 KKASSERT(record->leaf.base.localization != 0);
763 record->leaf.base.obj_id = ip->obj_id;
764 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
765 error = hammer_mem_add(record);
766 return(error);
770 * Locate a bulk record in-memory. Bulk records allow disk space to be
771 * reserved so the front-end can flush large data writes without having
772 * to queue the BIO to the flusher. Only the related record gets queued
773 * to the flusher.
775 static hammer_record_t
776 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
778 hammer_record_t record;
779 struct hammer_btree_leaf_elm leaf;
781 bzero(&leaf, sizeof(leaf));
782 leaf.base.obj_id = ip->obj_id;
783 leaf.base.key = file_offset + bytes;
784 leaf.base.create_tid = 0;
785 leaf.base.delete_tid = 0;
786 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
787 leaf.base.obj_type = 0; /* unused */
788 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
789 leaf.base.localization = HAMMER_LOCALIZE_MISC;
790 leaf.data_len = bytes;
792 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
793 if (record)
794 hammer_ref(&record->lock);
795 return(record);
799 * Reserve blockmap space placemarked with an in-memory record.
801 * This routine is called by the front-end in order to be able to directly
802 * flush a buffer cache buffer.
804 hammer_record_t
805 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
806 int *errorp)
808 hammer_record_t record;
809 hammer_record_t conflict;
810 int zone;
813 * Deal with conflicting in-memory records. We cannot have multiple
814 * in-memory records for the same offset without seriously confusing
815 * the backend, including but not limited to the backend issuing
816 * delete-create-delete sequences and asserting on the delete_tid
817 * being the same as the create_tid.
819 * If we encounter a record with the backend interlock set we cannot
820 * immediately delete it without confusing the backend.
822 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
823 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
824 conflict->flags |= HAMMER_RECF_WANTED;
825 tsleep(conflict, 0, "hmrrc3", 0);
826 hammer_rel_mem_record(conflict);
827 continue;
829 conflict->flags |= HAMMER_RECF_DELETED_FE;
830 hammer_rel_mem_record(conflict);
834 * Create a record to cover the direct write. This is called with
835 * the related BIO locked so there should be no possible conflict.
837 * The backend is responsible for finalizing the space reserved in
838 * this record.
840 * XXX bytes not aligned, depend on the reservation code to
841 * align the reservation.
843 record = hammer_alloc_mem_record(ip, 0);
844 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
845 HAMMER_ZONE_SMALL_DATA_INDEX;
846 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
847 &record->leaf.data_offset,
848 errorp);
849 if (record->resv == NULL) {
850 kprintf("hammer_ip_add_bulk: reservation failed\n");
851 hammer_rel_mem_record(record);
852 return(NULL);
854 record->type = HAMMER_MEM_RECORD_DATA;
855 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
856 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
857 record->leaf.base.obj_id = ip->obj_id;
858 record->leaf.base.key = file_offset + bytes;
859 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
860 record->leaf.data_len = bytes;
861 record->leaf.data_crc = crc32(data, bytes);
863 hammer_ref(&record->lock); /* mem_add eats a reference */
864 *errorp = hammer_mem_add(record);
865 KKASSERT(*errorp == 0);
867 return (record);
871 * Frontend truncation code. Scan in-memory records only. On-disk records
872 * and records in a flushing state are handled by the backend. The vnops
873 * setattr code will handle the block containing the truncation point.
875 * Partial blocks are not deleted.
878 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
880 struct rec_trunc_info info;
882 switch(ip->ino_data.obj_type) {
883 case HAMMER_OBJTYPE_REGFILE:
884 info.rec_type = HAMMER_RECTYPE_DATA;
885 break;
886 case HAMMER_OBJTYPE_DBFILE:
887 info.rec_type = HAMMER_RECTYPE_DB;
888 break;
889 default:
890 return(EINVAL);
892 info.trunc_off = file_size;
893 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
894 hammer_rec_trunc_callback, &info);
895 return(0);
898 static int
899 hammer_rec_trunc_callback(hammer_record_t record, void *data __unused)
901 if (record->flags & HAMMER_RECF_DELETED_FE)
902 return(0);
903 if (record->flush_state == HAMMER_FST_FLUSH)
904 return(0);
905 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
906 hammer_ref(&record->lock);
907 record->flags |= HAMMER_RECF_DELETED_FE;
908 hammer_rel_mem_record(record);
909 return(0);
914 * Backend code
916 * Sync data from a buffer cache buffer (typically) to the filesystem. This
917 * is called via the strategy called from a cached data source. This code
918 * is responsible for actually writing a data record out to the disk.
920 * This can only occur non-historically (i.e. 'current' data only).
922 * The file offset must be HAMMER_BUFSIZE aligned but the data length
923 * can be truncated. The record (currently) always represents a BUFSIZE
924 * swath of space whether the data is truncated or not.
927 hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
928 int64_t offset, void *data, int bytes)
930 hammer_transaction_t trans = cursor->trans;
931 struct hammer_btree_leaf_elm elm;
932 hammer_off_t data_offset;
933 void *bdata;
934 int error;
935 int aligned_bytes;
937 KKASSERT((offset & HAMMER_BUFMASK) == 0);
938 KKASSERT(trans->type == HAMMER_TRANS_FLS);
939 KKASSERT(bytes != 0);
942 * We don't have to do this but it's probably a good idea to
943 * align data allocations to 64-byte boundaries for future
944 * expansion.
946 aligned_bytes = (bytes + 15) & ~15;
947 retry:
948 hammer_normalize_cursor(cursor);
949 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
950 cursor->key_beg.obj_id = ip->obj_id;
951 cursor->key_beg.key = offset + aligned_bytes;
952 cursor->key_beg.create_tid = trans->tid;
953 cursor->key_beg.delete_tid = 0;
954 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
955 cursor->asof = trans->tid;
956 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
957 cursor->flags |= HAMMER_CURSOR_INSERT;
958 cursor->flags |= HAMMER_CURSOR_BACKEND;
961 * Issue a lookup to position the cursor.
963 error = hammer_btree_lookup(cursor);
964 if (error == 0) {
965 kprintf("hammer_ip_sync_data: duplicate data at "
966 "(%lld,%d) tid %016llx\n",
967 offset, aligned_bytes, trans->tid);
968 hammer_print_btree_elm(&cursor->node->ondisk->
969 elms[cursor->index],
970 HAMMER_BTREE_TYPE_LEAF, cursor->index);
971 panic("Duplicate data");
972 error = EIO;
974 if (error != ENOENT)
975 goto done;
978 * Allocate our data. The data buffer is not marked modified (yet)
980 bdata = hammer_alloc_data(trans, aligned_bytes, &data_offset,
981 &cursor->data_buffer, &error);
983 if (bdata == NULL)
984 goto done;
987 * Fill everything in and insert our B-Tree node.
989 * NOTE: hammer_alloc_data() has already marked the data buffer
990 * as modified. If we do it again we will generate unnecessary
991 * undo elements.
993 elm.base.btype = HAMMER_BTREE_TYPE_RECORD;
994 elm.base.localization = HAMMER_LOCALIZE_MISC;
995 elm.base.obj_id = ip->obj_id;
996 elm.base.key = offset + aligned_bytes;
997 elm.base.create_tid = trans->tid;
998 elm.base.delete_tid = 0;
999 elm.base.rec_type = HAMMER_RECTYPE_DATA;
1000 elm.atime = 0;
1001 elm.data_offset = data_offset;
1002 elm.data_len = aligned_bytes;
1005 * Copy the data to the allocated buffer. Since we are aligning
1006 * the record size as specified in elm.data_len, make sure to zero
1007 * out any extranious bytes.
1009 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1010 bcopy(data, bdata, bytes);
1011 if (aligned_bytes > bytes)
1012 bzero((char *)bdata + bytes, aligned_bytes - bytes);
1013 hammer_modify_buffer_done(cursor->data_buffer);
1014 elm.data_crc = crc32(bdata, aligned_bytes);
1017 * Data records can wind up on-disk before the inode itself is
1018 * on-disk. One must assume data records may be on-disk if either
1019 * HAMMER_INODE_DONDISK or HAMMER_INODE_ONDISK is set
1021 ip->flags |= HAMMER_INODE_DONDISK;
1023 error = hammer_btree_insert(cursor, &elm);
1024 if (error == 0)
1025 goto done;
1027 hammer_blockmap_free(trans, data_offset, aligned_bytes);
1028 done:
1029 if (error == EDEADLK) {
1030 hammer_done_cursor(cursor);
1031 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1032 if (error == 0)
1033 goto retry;
1035 return(error);
1038 #if 0
1041 * Backend code which actually performs the write to the media. This
1042 * routine is typically called from the flusher. The bio will be disposed
1043 * of (biodone'd) by this routine.
1045 * Iterate the related records and mark for deletion. If existing edge
1046 * records (left and right side) overlap our write they have to be marked
1047 * deleted and new records created, usually referencing a portion of the
1048 * original data. Then add a record to represent the buffer.
1051 hammer_dowrite(hammer_cursor_t cursor, hammer_inode_t ip,
1052 off_t file_offset, void *data, int bytes)
1054 int error;
1056 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1059 * If the inode is going or gone, just throw away any frontend
1060 * buffers.
1062 if (ip->flags & HAMMER_INODE_DELETED)
1063 return(0);
1066 * Delete any records overlapping our range. This function will
1067 * (eventually) properly truncate partial overlaps.
1069 if (ip->sync_ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1070 error = hammer_ip_delete_range(cursor, ip, file_offset,
1071 file_offset, 0);
1072 } else {
1073 error = hammer_ip_delete_range(cursor, ip, file_offset,
1074 file_offset + bytes - 1, 0);
1078 * Add a single record to cover the write. We can write a record
1079 * with only the actual file data - for example, a small 200 byte
1080 * file does not have to write out a 16K record.
1082 * While the data size does not have to be aligned, we still do it
1083 * to reduce fragmentation in a future allocation model.
1085 if (error == 0) {
1086 int limit_size;
1088 if (ip->sync_ino_data.size - file_offset > bytes) {
1089 limit_size = bytes;
1090 } else {
1091 limit_size = (int)(ip->sync_ino_data.size -
1092 file_offset);
1093 KKASSERT(limit_size >= 0);
1095 if (limit_size) {
1096 error = hammer_ip_sync_data(cursor, ip, file_offset,
1097 data, limit_size);
1100 if (error)
1101 Debugger("hammer_dowrite: error");
1102 return(error);
1105 #endif
1108 * Backend code. Sync a record to the media.
1111 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1113 hammer_transaction_t trans = cursor->trans;
1114 int64_t file_offset;
1115 void *bdata;
1116 int error;
1118 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1119 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1120 KKASSERT(record->leaf.base.localization != 0);
1123 * If this is a bulk-data record placemarker there may be an existing
1124 * record on-disk, indicating a data overwrite. If there is the
1125 * on-disk record must be deleted before we can insert our new record.
1127 * We've synthesized this record and do not know what the create_tid
1128 * on-disk is, nor how much data it represents.
1130 * Keep in mind that (key) for data records is (base_offset + len),
1131 * not (base_offset). Also, we only want to get rid of on-disk
1132 * records since we are trying to sync our in-memory record, call
1133 * hammer_ip_delete_range() with truncating set to 1 to make sure
1134 * it skips in-memory records.
1136 * It is ok for the lookup to return ENOENT.
1138 if (record->type == HAMMER_MEM_RECORD_DATA) {
1139 file_offset = record->leaf.base.key - record->leaf.data_len;
1140 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1141 error = hammer_ip_delete_range(
1142 cursor, record->ip,
1143 file_offset, file_offset + HAMMER_BUFSIZE - 1,
1145 if (error && error != ENOENT)
1146 goto done;
1150 * Setup the cursor.
1152 hammer_normalize_cursor(cursor);
1153 cursor->key_beg = record->leaf.base;
1154 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1155 cursor->flags |= HAMMER_CURSOR_BACKEND;
1156 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1159 * Records can wind up on-media before the inode itself is on-media.
1160 * Flag the case.
1162 record->ip->flags |= HAMMER_INODE_DONDISK;
1165 * If we are deleting a directory entry an exact match must be
1166 * found on-disk.
1168 if (record->type == HAMMER_MEM_RECORD_DEL) {
1169 error = hammer_btree_lookup(cursor);
1170 if (error == 0) {
1171 error = hammer_ip_delete_record(cursor, record->ip,
1172 trans->tid);
1173 if (error == 0) {
1174 record->flags |= HAMMER_RECF_DELETED_FE;
1175 record->flags |= HAMMER_RECF_DELETED_BE;
1178 goto done;
1182 * We are inserting.
1184 * Issue a lookup to position the cursor and locate the cluster. The
1185 * target key should not exist. If we are creating a directory entry
1186 * we may have to iterate the low 32 bits of the key to find an unused
1187 * key.
1189 cursor->flags |= HAMMER_CURSOR_INSERT;
1191 for (;;) {
1192 error = hammer_btree_lookup(cursor);
1193 if (hammer_debug_inode)
1194 kprintf("DOINSERT LOOKUP %d\n", error);
1195 if (error)
1196 break;
1197 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1198 kprintf("hammer_ip_sync_record: duplicate rec "
1199 "at (%016llx)\n", record->leaf.base.key);
1200 Debugger("duplicate record1");
1201 error = EIO;
1202 break;
1204 if (++trans->hmp->namekey_iterator == 0)
1205 ++trans->hmp->namekey_iterator;
1206 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1207 record->leaf.base.key |= trans->hmp->namekey_iterator;
1208 cursor->key_beg.key = record->leaf.base.key;
1210 #if 0
1211 if (record->type == HAMMER_MEM_RECORD_DATA)
1212 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1213 record->leaf.base.key - record->leaf.data_len,
1214 record->leaf.data_offset, error);
1215 #endif
1218 if (error != ENOENT)
1219 goto done;
1222 * Allocate the record and data. The result buffers will be
1223 * marked as being modified and further calls to
1224 * hammer_modify_buffer() will result in unneeded UNDO records.
1226 * Support zero-fill records (data == NULL and data_len != 0)
1228 if (record->type == HAMMER_MEM_RECORD_DATA) {
1230 * The data portion of a bulk-data record has already been
1231 * committed to disk, we need only adjust the layer2
1232 * statistics in the same transaction as our B-Tree insert.
1234 KKASSERT(record->leaf.data_offset != 0);
1235 hammer_blockmap_free(trans, record->leaf.data_offset,
1236 -record->leaf.data_len);
1237 error = 0;
1238 } else if (record->data && record->leaf.data_len) {
1240 * Wholely cached record, with data. Allocate the data.
1242 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1243 &record->leaf.data_offset,
1244 &cursor->data_buffer, &error);
1245 if (bdata == NULL)
1246 goto done;
1247 record->leaf.data_crc = crc32(record->data,
1248 record->leaf.data_len);
1249 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1250 bcopy(record->data, bdata, record->leaf.data_len);
1251 hammer_modify_buffer_done(cursor->data_buffer);
1252 } else {
1254 * Wholely cached record, without data.
1256 record->leaf.data_offset = 0;
1257 record->leaf.data_crc = 0;
1260 error = hammer_btree_insert(cursor, &record->leaf);
1261 if (hammer_debug_inode && error)
1262 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1265 * Our record is on-disk, normally mark the in-memory version as
1266 * deleted. If the record represented a directory deletion but
1267 * we had to sync a valid directory entry to disk we must convert
1268 * the record to a covering delete so the frontend does not have
1269 * visibility on the synced entry.
1271 if (error == 0) {
1272 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1273 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1274 record->flags &= ~HAMMER_RECF_DELETED_FE;
1275 record->type = HAMMER_MEM_RECORD_DEL;
1276 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1277 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1278 /* hammer_flush_record_done takes care of the rest */
1279 } else {
1280 record->flags |= HAMMER_RECF_DELETED_FE;
1281 record->flags |= HAMMER_RECF_DELETED_BE;
1283 } else {
1284 if (record->leaf.data_offset) {
1285 hammer_blockmap_free(trans, record->leaf.data_offset,
1286 record->leaf.data_len);
1290 done:
1291 return(error);
1295 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1296 * entry's key is used to deal with hash collisions in the upper 32 bits.
1297 * A unique 64 bit key is generated in-memory and may be regenerated a
1298 * second time when the directory record is flushed to the on-disk B-Tree.
1300 * A referenced record is passed to this function. This function
1301 * eats the reference. If an error occurs the record will be deleted.
1303 * A copy of the temporary record->data pointer provided by the caller
1304 * will be made.
1306 static
1308 hammer_mem_add(hammer_record_t record)
1310 hammer_mount_t hmp = record->ip->hmp;
1313 * Make a private copy of record->data
1315 if (record->data)
1316 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1319 * Insert into the RB tree, find an unused iterator if this is
1320 * a directory entry.
1322 while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1323 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
1324 record->flags |= HAMMER_RECF_DELETED_FE;
1325 hammer_rel_mem_record(record);
1326 return (EEXIST);
1328 if (++hmp->namekey_iterator == 0)
1329 ++hmp->namekey_iterator;
1330 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1331 record->leaf.base.key |= hmp->namekey_iterator;
1333 ++hmp->rsv_recs;
1334 ++record->ip->rsv_recs;
1335 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1336 record->flags |= HAMMER_RECF_ONRBTREE;
1337 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1338 hammer_rel_mem_record(record);
1339 return(0);
1342 /************************************************************************
1343 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1344 ************************************************************************
1346 * These functions augment the B-Tree scanning functions in hammer_btree.c
1347 * by merging in-memory records with on-disk records.
1351 * Locate a particular record either in-memory or on-disk.
1353 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1354 * NOT be called to iterate results.
1357 hammer_ip_lookup(hammer_cursor_t cursor)
1359 int error;
1362 * If the element is in-memory return it without searching the
1363 * on-disk B-Tree
1365 KKASSERT(cursor->ip);
1366 error = hammer_mem_lookup(cursor);
1367 if (error == 0) {
1368 cursor->leaf = &cursor->iprec->leaf;
1369 return(error);
1371 if (error != ENOENT)
1372 return(error);
1375 * If the inode has on-disk components search the on-disk B-Tree.
1377 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1378 return(error);
1379 error = hammer_btree_lookup(cursor);
1380 if (error == 0)
1381 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1382 return(error);
1386 * Locate the first record within the cursor's key_beg/key_end range,
1387 * restricted to a particular inode. 0 is returned on success, ENOENT
1388 * if no records matched the requested range, or some other error.
1390 * When 0 is returned hammer_ip_next() may be used to iterate additional
1391 * records within the requested range.
1393 * This function can return EDEADLK, requiring the caller to terminate
1394 * the cursor and try again.
1397 hammer_ip_first(hammer_cursor_t cursor)
1399 hammer_inode_t ip = cursor->ip;
1400 int error;
1402 KKASSERT(ip != NULL);
1405 * Clean up fields and setup for merged scan
1407 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1408 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1409 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1410 if (cursor->iprec) {
1411 hammer_rel_mem_record(cursor->iprec);
1412 cursor->iprec = NULL;
1416 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1417 * exact lookup so if we get ENOENT we have to call the iterate
1418 * function to validate the first record after the begin key.
1420 * The ATEDISK flag is used by hammer_btree_iterate to determine
1421 * whether it must index forwards or not. It is also used here
1422 * to select the next record from in-memory or on-disk.
1424 * EDEADLK can only occur if the lookup hit an empty internal
1425 * element and couldn't delete it. Since this could only occur
1426 * in-range, we can just iterate from the failure point.
1428 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1429 error = hammer_btree_lookup(cursor);
1430 if (error == ENOENT || error == EDEADLK) {
1431 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1432 if (hammer_debug_general & 0x2000)
1433 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1434 error = hammer_btree_iterate(cursor);
1436 if (error && error != ENOENT)
1437 return(error);
1438 if (error == 0) {
1439 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1440 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1441 } else {
1442 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1447 * Search the in-memory record list (Red-Black tree). Unlike the
1448 * B-Tree search, mem_first checks for records in the range.
1450 error = hammer_mem_first(cursor);
1451 if (error && error != ENOENT)
1452 return(error);
1453 if (error == 0) {
1454 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1455 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1456 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1457 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1461 * This will return the first matching record.
1463 return(hammer_ip_next(cursor));
1467 * Retrieve the next record in a merged iteration within the bounds of the
1468 * cursor. This call may be made multiple times after the cursor has been
1469 * initially searched with hammer_ip_first().
1471 * 0 is returned on success, ENOENT if no further records match the
1472 * requested range, or some other error code is returned.
1475 hammer_ip_next(hammer_cursor_t cursor)
1477 hammer_btree_elm_t elm;
1478 hammer_record_t rec, save;
1479 int error;
1480 int r;
1482 next_btree:
1484 * Load the current on-disk and in-memory record. If we ate any
1485 * records we have to get the next one.
1487 * If we deleted the last on-disk record we had scanned ATEDISK will
1488 * be clear and DELBTREE will be set, forcing a call to iterate. The
1489 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1490 * element. If ATEDISK is set, iterate will skip the 'current'
1491 * element.
1493 * Get the next on-disk record
1495 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1496 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1497 error = hammer_btree_iterate(cursor);
1498 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1499 if (error == 0)
1500 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1501 else
1502 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1503 HAMMER_CURSOR_ATEDISK;
1507 next_memory:
1509 * Get the next in-memory record. The record can be ripped out
1510 * of the RB tree so we maintain a scan_info structure to track
1511 * the next node.
1513 * hammer_rec_scan_cmp: Is the record still in our general range,
1514 * (non-inclusive of snapshot exclusions)?
1515 * hammer_rec_scan_callback: Is the record in our snapshot?
1517 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1518 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1519 save = cursor->iprec;
1520 cursor->iprec = NULL;
1521 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1522 while (rec) {
1523 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1524 break;
1525 if (hammer_rec_scan_callback(rec, cursor) != 0)
1526 break;
1527 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1529 if (save)
1530 hammer_rel_mem_record(save);
1531 if (cursor->iprec) {
1532 KKASSERT(cursor->iprec == rec);
1533 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1534 } else {
1535 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1541 * The memory record may have become stale while being held in
1542 * cursor->iprec. We are interlocked against the backend on
1543 * with regards to B-Tree entries.
1545 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1546 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1547 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1548 goto next_memory;
1553 * Extract either the disk or memory record depending on their
1554 * relative position.
1556 error = 0;
1557 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1558 case 0:
1560 * Both entries valid. Compare the entries and nominally
1561 * return the first one in the sort order. Numerous cases
1562 * require special attention, however.
1564 elm = &cursor->node->ondisk->elms[cursor->index];
1565 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1568 * If the two entries differ only by their key (-2/2) or
1569 * create_tid (-1/1), and are DATA records, we may have a
1570 * nominal match. We have to calculate the base file
1571 * offset of the data.
1573 if (r <= 2 && r >= -2 && r != 0 &&
1574 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1575 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1576 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1577 int64_t base2 = cursor->iprec->leaf.base.key -
1578 cursor->iprec->leaf.data_len;
1579 if (base1 == base2) {
1580 kprintf("G");
1581 r = 0;
1585 if (r < 0) {
1586 error = hammer_btree_extract(cursor,
1587 HAMMER_CURSOR_GET_LEAF);
1588 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1589 break;
1593 * If the entries match exactly the memory entry is either
1594 * an on-disk directory entry deletion or a bulk data
1595 * overwrite. If it is a directory entry deletion we eat
1596 * both entries.
1598 * For the bulk-data overwrite case it is possible to have
1599 * visibility into both, which simply means the syncer
1600 * hasn't gotten around to doing the delete+insert sequence
1601 * on the B-Tree. Use the memory entry and throw away the
1602 * on-disk entry.
1604 * If the in-memory record is not either of these we
1605 * probably caught the syncer while it was syncing it to
1606 * the media. Since we hold a shared lock on the cursor,
1607 * the in-memory record had better be marked deleted at
1608 * this point.
1610 if (r == 0) {
1611 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1612 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1613 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1614 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1615 goto next_btree;
1617 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1618 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1619 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1621 /* fall through to memory entry */
1622 } else {
1623 panic("hammer_ip_next: duplicate mem/b-tree entry");
1624 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1625 goto next_memory;
1628 /* fall through to the memory entry */
1629 case HAMMER_CURSOR_ATEDISK:
1631 * Only the memory entry is valid.
1633 cursor->leaf = &cursor->iprec->leaf;
1634 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1637 * If the memory entry is an on-disk deletion we should have
1638 * also had found a B-Tree record. If the backend beat us
1639 * to it it would have interlocked the cursor and we should
1640 * have seen the in-memory record marked DELETED_FE.
1642 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1643 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1644 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1646 break;
1647 case HAMMER_CURSOR_ATEMEM:
1649 * Only the disk entry is valid
1651 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1652 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1653 break;
1654 default:
1656 * Neither entry is valid
1658 * XXX error not set properly
1660 cursor->leaf = NULL;
1661 error = ENOENT;
1662 break;
1664 return(error);
1668 * Resolve the cursor->data pointer for the current cursor position in
1669 * a merged iteration.
1672 hammer_ip_resolve_data(hammer_cursor_t cursor)
1674 hammer_record_t record;
1675 int error;
1677 if (hammer_cursor_inmem(cursor)) {
1679 * The data associated with an in-memory record is usually
1680 * kmalloced, but reserve-ahead data records will have an
1681 * on-disk reference.
1683 * NOTE: Reserve-ahead data records must be handled in the
1684 * context of the related high level buffer cache buffer
1685 * to interlock against async writes.
1687 record = cursor->iprec;
1688 cursor->data = record->data;
1689 error = 0;
1690 if (cursor->data == NULL) {
1691 KKASSERT(record->leaf.base.rec_type ==
1692 HAMMER_RECTYPE_DATA);
1693 cursor->data = hammer_bread(cursor->trans->hmp,
1694 record->leaf.data_offset,
1695 &error,
1696 &cursor->data_buffer);
1698 } else {
1699 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1700 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1702 return(error);
1706 * Backend truncation / record replacement - delete records in range.
1708 * Delete all records within the specified range for inode ip. In-memory
1709 * records still associated with the frontend are ignored.
1711 * NOTE: An unaligned range will cause new records to be added to cover
1712 * the edge cases. (XXX not implemented yet).
1714 * NOTE: Replacement via reservations (see hammer_ip_sync_record_cursor())
1715 * also do not deal with unaligned ranges.
1717 * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1719 * NOTE: Record keys for regular file data have to be special-cased since
1720 * they indicate the end of the range (key = base + bytes).
1723 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1724 int64_t ran_beg, int64_t ran_end, int truncating)
1726 hammer_transaction_t trans = cursor->trans;
1727 hammer_btree_leaf_elm_t leaf;
1728 int error;
1729 int64_t off;
1731 #if 0
1732 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1733 #endif
1735 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1736 retry:
1737 hammer_normalize_cursor(cursor);
1738 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1739 cursor->key_beg.obj_id = ip->obj_id;
1740 cursor->key_beg.create_tid = 0;
1741 cursor->key_beg.delete_tid = 0;
1742 cursor->key_beg.obj_type = 0;
1743 cursor->asof = ip->obj_asof;
1744 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1745 cursor->flags |= HAMMER_CURSOR_ASOF;
1746 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1747 cursor->flags |= HAMMER_CURSOR_BACKEND;
1749 cursor->key_end = cursor->key_beg;
1750 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1751 cursor->key_beg.key = ran_beg;
1752 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1753 cursor->key_end.rec_type = HAMMER_RECTYPE_DB;
1754 cursor->key_end.key = ran_end;
1755 } else {
1757 * The key in the B-Tree is (base+bytes), so the first possible
1758 * matching key is ran_beg + 1.
1760 int64_t tmp64;
1762 cursor->key_beg.key = ran_beg + 1;
1763 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1764 cursor->key_end.rec_type = HAMMER_RECTYPE_DATA;
1766 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1767 if (tmp64 < ran_end)
1768 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1769 else
1770 cursor->key_end.key = ran_end + MAXPHYS + 1;
1772 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1774 error = hammer_ip_first(cursor);
1777 * Iterate through matching records and mark them as deleted.
1779 while (error == 0) {
1780 leaf = cursor->leaf;
1782 KKASSERT(leaf->base.delete_tid == 0);
1785 * There may be overlap cases for regular file data. Also
1786 * remember the key for a regular file record is (base + len),
1787 * NOT (base).
1789 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1790 off = leaf->base.key - leaf->data_len;
1792 * Check the left edge case. We currently do not
1793 * split existing records.
1795 if (off < ran_beg) {
1796 panic("hammer left edge case %016llx %d\n",
1797 leaf->base.key, leaf->data_len);
1801 * Check the right edge case. Note that the
1802 * record can be completely out of bounds, which
1803 * terminates the search.
1805 * base->key is exclusive of the right edge while
1806 * ran_end is inclusive of the right edge. The
1807 * (key - data_len) left boundary is inclusive.
1809 * XXX theory-check this test at some point, are
1810 * we missing a + 1 somewhere? Note that ran_end
1811 * could overflow.
1813 if (leaf->base.key - 1 > ran_end) {
1814 if (leaf->base.key - leaf->data_len > ran_end)
1815 break;
1816 panic("hammer right edge case\n");
1821 * Delete the record. When truncating we do not delete
1822 * in-memory (data) records because they represent data
1823 * written after the truncation.
1825 * This will also physically destroy the B-Tree entry and
1826 * data if the retention policy dictates. The function
1827 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1828 * uses to perform a fixup.
1830 if (truncating == 0 || hammer_cursor_ondisk(cursor))
1831 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1832 if (error)
1833 break;
1834 error = hammer_ip_next(cursor);
1836 if (error == EDEADLK) {
1837 hammer_done_cursor(cursor);
1838 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1839 if (error == 0)
1840 goto retry;
1842 if (error == ENOENT)
1843 error = 0;
1844 return(error);
1848 * Backend truncation - delete all records.
1850 * Delete all user records associated with an inode except the inode record
1851 * itself. Directory entries are not deleted (they must be properly disposed
1852 * of or nlinks would get upset).
1855 hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip,
1856 int *countp)
1858 hammer_transaction_t trans = cursor->trans;
1859 hammer_btree_leaf_elm_t leaf;
1860 int error;
1862 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1863 retry:
1864 hammer_normalize_cursor(cursor);
1865 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1866 cursor->key_beg.obj_id = ip->obj_id;
1867 cursor->key_beg.create_tid = 0;
1868 cursor->key_beg.delete_tid = 0;
1869 cursor->key_beg.obj_type = 0;
1870 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1871 cursor->key_beg.key = HAMMER_MIN_KEY;
1873 cursor->key_end = cursor->key_beg;
1874 cursor->key_end.rec_type = 0xFFFF;
1875 cursor->key_end.key = HAMMER_MAX_KEY;
1877 cursor->asof = ip->obj_asof;
1878 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1879 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1880 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1881 cursor->flags |= HAMMER_CURSOR_BACKEND;
1883 error = hammer_ip_first(cursor);
1886 * Iterate through matching records and mark them as deleted.
1888 while (error == 0) {
1889 leaf = cursor->leaf;
1891 KKASSERT(leaf->base.delete_tid == 0);
1894 * Mark the record and B-Tree entry as deleted. This will
1895 * also physically delete the B-Tree entry, record, and
1896 * data if the retention policy dictates. The function
1897 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1898 * uses to perform a fixup.
1900 * Directory entries (and delete-on-disk directory entries)
1901 * must be synced and cannot be deleted.
1903 if (leaf->base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1904 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1905 ++*countp;
1907 if (error)
1908 break;
1909 error = hammer_ip_next(cursor);
1911 if (error == EDEADLK) {
1912 hammer_done_cursor(cursor);
1913 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1914 if (error == 0)
1915 goto retry;
1917 if (error == ENOENT)
1918 error = 0;
1919 return(error);
1923 * Delete the record at the current cursor. On success the cursor will
1924 * be positioned appropriately for an iteration but may no longer be at
1925 * a leaf node.
1927 * This routine is only called from the backend.
1929 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1930 * cursor and retry.
1933 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1934 hammer_tid_t tid)
1936 hammer_btree_elm_t elm;
1937 hammer_mount_t hmp;
1938 int error;
1939 int dodelete;
1941 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1942 KKASSERT(tid != 0);
1945 * In-memory (unsynchronized) records can simply be freed. This
1946 * only occurs in range iterations since all other records are
1947 * individually synchronized. Thus there should be no confusion with
1948 * the interlock.
1950 if (hammer_cursor_inmem(cursor)) {
1951 KKASSERT((cursor->iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1952 cursor->iprec->flags |= HAMMER_RECF_DELETED_FE;
1953 cursor->iprec->flags |= HAMMER_RECF_DELETED_BE;
1954 return(0);
1958 * On-disk records are marked as deleted by updating their delete_tid.
1959 * This does not effect their position in the B-Tree (which is based
1960 * on their create_tid).
1962 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1963 elm = NULL;
1964 hmp = cursor->node->hmp;
1967 * If we were mounted with the nohistory option, we physically
1968 * delete the record.
1970 dodelete = hammer_nohistory(ip);
1972 if (error == 0) {
1973 error = hammer_cursor_upgrade(cursor);
1974 if (error == 0) {
1975 elm = &cursor->node->ondisk->elms[cursor->index];
1976 hammer_modify_node(cursor->trans, cursor->node,
1977 &elm->leaf.base.delete_tid,
1978 sizeof(elm->leaf.base.delete_tid));
1979 elm->leaf.base.delete_tid = tid;
1980 hammer_modify_node_done(cursor->node);
1983 * An on-disk record cannot have the same delete_tid
1984 * as its create_tid. In a chain of record updates
1985 * this could result in a duplicate record.
1987 KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1991 if (error == 0 && dodelete) {
1992 error = hammer_delete_at_cursor(cursor, NULL);
1993 if (error) {
1994 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1995 error = 0;
1998 return(error);
2002 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
2004 hammer_btree_elm_t elm;
2005 hammer_off_t data_offset;
2006 int32_t data_len;
2007 u_int16_t rec_type;
2008 int error;
2010 elm = &cursor->node->ondisk->elms[cursor->index];
2011 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2013 data_offset = elm->leaf.data_offset;
2014 data_len = elm->leaf.data_len;
2015 rec_type = elm->leaf.base.rec_type;
2017 error = hammer_btree_delete(cursor);
2018 if (error == 0) {
2020 * This forces a fixup for the iteration because
2021 * the cursor is now either sitting at the 'next'
2022 * element or sitting at the end of a leaf.
2024 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2025 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2026 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2029 if (error == 0) {
2030 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2031 case HAMMER_ZONE_LARGE_DATA:
2032 case HAMMER_ZONE_SMALL_DATA:
2033 hammer_blockmap_free(cursor->trans,
2034 data_offset, data_len);
2035 break;
2036 default:
2037 break;
2040 return (error);
2044 * Determine whether we can remove a directory. This routine checks whether
2045 * a directory is empty or not and enforces flush connectivity.
2047 * Flush connectivity requires that we block if the target directory is
2048 * currently flushing, otherwise it may not end up in the same flush group.
2050 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2053 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2055 struct hammer_cursor cursor;
2056 int error;
2059 * Check directory empty
2061 hammer_init_cursor(trans, &cursor, &ip->cache[0], ip);
2063 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
2064 cursor.key_beg.obj_id = ip->obj_id;
2065 cursor.key_beg.create_tid = 0;
2066 cursor.key_beg.delete_tid = 0;
2067 cursor.key_beg.obj_type = 0;
2068 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2069 cursor.key_beg.key = HAMMER_MIN_KEY;
2071 cursor.key_end = cursor.key_beg;
2072 cursor.key_end.rec_type = 0xFFFF;
2073 cursor.key_end.key = HAMMER_MAX_KEY;
2075 cursor.asof = ip->obj_asof;
2076 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2078 error = hammer_ip_first(&cursor);
2079 if (error == ENOENT)
2080 error = 0;
2081 else if (error == 0)
2082 error = ENOTEMPTY;
2083 hammer_done_cursor(&cursor);
2084 return(error);